l2_pgentry_t *pl2e;
int which, cpu = smp_processor_id();
u32 l2_idx;
- struct domain *d = current->domain;
- LOCK_BIGLOCK(d);
/*
* Attempt to read the PTE that maps the VA being accessed. By checking for
* PDE validity in the L2 we avoid many expensive fixups in __get_user().
_PAGE_PRESENT) ||
__get_user(pte, (unsigned long *)&linear_pg_table[addr>>PAGE_SHIFT]) )
{
- UNLOCK_BIGLOCK(d);
return 0;
}
if ( ((pte & (_PAGE_RW | _PAGE_PRESENT)) != _PAGE_PRESENT) ||
((page->u.inuse.type_info & PGT_type_mask) != PGT_l1_page_table) )
{
- UNLOCK_BIGLOCK(d);
return 0;
}
l2_idx = page->u.inuse.type_info & PGT_va_mask;
if ( unlikely(l2_idx >= PGT_va_unknown) )
{
- UNLOCK_BIGLOCK(d);
domain_crash(); /* Urk! This L1 is mapped in multiple L2 slots! */
}
l2_idx >>= PGT_va_shift;
/* Toss the writable pagetable state and crash. */
unmap_domain_mem(ptwr_info[cpu].ptinfo[which].pl1e);
ptwr_info[cpu].ptinfo[which].l1va = 0;
- UNLOCK_BIGLOCK(d);
domain_crash();
}
- UNLOCK_BIGLOCK(d);
-
return EXCRET_fault_fixed;
}
if ( likely(VM_ASSIST(d, VMASST_TYPE_writable_pagetables)) )
{
+ LOCK_BIGLOCK(d);
if ( unlikely(ptwr_info[cpu].ptinfo[PTWR_PT_ACTIVE].l1va) &&
unlikely((addr >> L2_PAGETABLE_SHIFT) ==
ptwr_info[cpu].ptinfo[PTWR_PT_ACTIVE].l2_idx) )
{
- LOCK_BIGLOCK(d);
ptwr_flush(PTWR_PT_ACTIVE);
UNLOCK_BIGLOCK(d);
return EXCRET_fault_fixed;
{
if ( unlikely(ed->mm.shadow_mode) )
(void)shadow_fault(addr, regs->error_code);
+ UNLOCK_BIGLOCK(d);
return EXCRET_fault_fixed;
}
+ UNLOCK_BIGLOCK(d);
}
if ( unlikely(ed->mm.shadow_mode) &&